-
/******************************************************************************
* dom0_ops.c
*
}
break;
- case DOM0_MAPTASK:
+ case DOM0_GETMEMLIST:
{
- unsigned int dom = op.u.mapdomts.domain;
-
- op.u.mapdomts.ts_phy_addr = __pa(find_domain_by_id(dom));
- copy_to_user(u_dom0_op, &op, sizeof(op));
+ int i;
+ unsigned long pfn = op.u.getmemlist.start_pfn;
+ unsigned long *buffer = op.u.getmemlist.buffer;
+ for ( i = 0; i < op.u.getmemlist.num_pfns; i++ )
+ {
+ /* XXX We trust DOM0 to give us a safe buffer. XXX */
+ *buffer++ = pfn;
+ pfn = (frame_table + pfn)->next;
+ }
}
break;
unsigned long phys_l1tab, phys_l2tab;
unsigned long cur_address, alloc_address;
unsigned long virt_load_address, virt_stack_address, virt_shinfo_address;
- unsigned long virt_ftable_start, virt_ftable_end, ft_mapping;
start_info_t *virt_startinfo_address;
unsigned long long time;
unsigned long count;
unsigned long alloc_index;
- unsigned long ft_pages;
l2_pgentry_t *l2tab, *l2start;
l1_pgentry_t *l1tab = NULL, *l1start = NULL;
struct pfn_info *page = NULL;
p->mm.pagetable = mk_pagetable(phys_l2tab);
/*
- * NB. The upper limit on this loop does one extra page + pages for frame
- * table. This is to make sure a pte exists when we want to map the
- * shared_info struct and frame table struct.
+ * NB. The upper limit on this loop does one extra page. This is to make
+ * sure a pte exists when we want to map the shared_info struct.
*/
- ft_pages = frame_table_size >> PAGE_SHIFT;
l2tab += l2_table_offset(virt_load_address);
cur_address = p->pg_head << PAGE_SHIFT;
- for ( count = 0;
- count < p->tot_pages + 1 + ft_pages;
- count++)
+ for ( count = 0; count < p->tot_pages + 1; count++ )
{
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
}
*l1tab++ = mk_l1_pgentry(cur_address|L1_PROT);
- if(count < p->tot_pages)
+ if ( count < p->tot_pages )
{
page = frame_table + (cur_address >> PAGE_SHIFT);
page->flags = dom | PGT_writeable_page;
page->type_count = page->tot_count = 1;
+ /* Set up the MPT entry. */
+ machine_to_phys_mapping[cur_address >> PAGE_SHIFT] = count;
}
cur_address = ((frame_table + (cur_address >> PAGE_SHIFT))->next) << PAGE_SHIFT;
/* pages that are part of page tables must be read only */
cur_address = p->pg_head << PAGE_SHIFT;
- for(count = 0;
- count < alloc_index;
- count++){
+ for ( count = 0; count < alloc_index; count++ )
+ {
cur_address = ((frame_table + (cur_address >> PAGE_SHIFT))->next) << PAGE_SHIFT;
}
l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
l1tab += l1_table_offset(virt_load_address + (alloc_index << PAGE_SHIFT));
l2tab++;
- for(count = alloc_index;
- count < p->tot_pages;
- count++){
+ for ( count = alloc_index; count < p->tot_pages; count++ )
+ {
*l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
- if(!((unsigned long)l1tab & (PAGE_SIZE - 1))){
+ if( !((unsigned long)l1tab & (PAGE_SIZE - 1)) )
+ {
unmap_domain_mem(l1start);
l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
l2tab++;
virt_startinfo_address = (start_info_t *)
(virt_load_address + ((alloc_index - 1) << PAGE_SHIFT));
virt_stack_address = (unsigned long)virt_startinfo_address;
-
- /* set up frame_table mapping */
- ft_mapping = (unsigned long)frame_table;
- virt_ftable_start = virt_shinfo_address + PAGE_SIZE;
- virt_ftable_end = virt_ftable_start + frame_table_size;
- for(cur_address = virt_ftable_start;
- cur_address < virt_ftable_end;
- cur_address += PAGE_SIZE){
- l2tab = l2start + l2_table_offset(cur_address);
- l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
- l1tab += l1_table_offset(cur_address);
- *l1tab = mk_l1_pgentry(__pa(ft_mapping)|L1_PROT);
- unmap_domain_mem(l1start);
- ft_mapping += PAGE_SIZE;
- }
unmap_domain_mem(l2start);
virt_startinfo_address->pt_base = virt_load_address +
((p->tot_pages - 1) << PAGE_SHIFT);
virt_startinfo_address->phys_base = p->pg_head << PAGE_SHIFT;
- virt_startinfo_address->frame_table = virt_ftable_start;
/* Add virtual network interfaces and point to them in startinfo. */
while (params->num_vifs-- > 0) {
#include <asm/uaccess.h>
#include <asm/domain_page.h>
-#if 0
+#if 1
#define MEM_LOG(_f, _a...) printk("DOM%d: (file=memory.c, line=%d) " _f "\n", current->domain, __LINE__, ## _a )
#else
#define MEM_LOG(_f, _a...) ((void)0)
break;
}
}
+ else
+ {
+ MEM_LOG("Bad domain normal update (dom %d, pfn %ld)",
+ current->domain, pfn);
+ }
break;
case PGREQ_MPT_UPDATE:
if ( DOMAIN_OKAY(page->flags) )
{
machine_to_phys_mapping[pfn] = cur->val;
+ err = 0;
+ }
+ else
+ {
+ MEM_LOG("Bad domain MPT update (dom %d, pfn %ld)",
+ current->domain, pfn);
}
break;
net_ring_t *net_rings;
int num_net_rings;
blk_ring_t *blk_ring; /* block io communication rings */
- unsigned long frame_table;
unsigned char cmd_line[1]; /* variable-length */
} start_info_t;
-
/******************************************************************************
* dom0_ops.h
*
#define DOM0_NEWDOMAIN 0
#define DOM0_KILLDOMAIN 1
-#define DOM0_MAPTASK 2
+#define DOM0_GETMEMLIST 2
#define DOM0_STARTDOM 4
#define MAX_CMD_LEN 256
unsigned int domain;
} dom0_killdomain_t;
-typedef struct dom0_map_ts
+typedef struct dom0_getmemlist_st
{
- unsigned int domain;
- unsigned long ts_phy_addr;
-} dom0_tsmap_t;
+ unsigned long start_pfn;
+ unsigned long num_pfns;
+ void *buffer;
+} dom0_getmemlist_t;
typedef struct domain_launch
{
{
dom0_newdomain_t newdomain;
dom0_killdomain_t killdomain;
- dom0_tsmap_t mapdomts;
+ dom0_getmemlist_t getmemlist;
dom_meminfo_t meminfo;
}
u;
#define MAP_DISCONT 1
-frame_table_t * frame_table;
-
static struct proc_dir_entry *xeno_base;
static struct proc_dir_entry *dom0_cmd_intf;
static struct proc_dir_entry *proc_ft;
static int __init init_module(void)
{
- frame_table = (frame_table_t *)start_info.frame_table;
-
/* xeno proc root setup */
xeno_base = proc_mkdir(XENO_BASE, &proc_root);
-
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <asm/mmu.h>
#include "hypervisor_defs.h"
+#include "dom0_ops.h"
#define MAP_CONT 0
#define MAP_DISCONT 1
int direct_remap_disc_page_range(unsigned long from,
unsigned long first_pg, int tot_pages, pgprot_t prot)
{
- frame_table_t * current_ft;
- unsigned long current_pfn;
+ dom0_op_t dom0_op;
+ unsigned long *pfns = get_free_page(GFP_KERNEL);
unsigned long start = from;
- int count = 0;
-
- current_ft = frame_table + first_pg;
- current_pfn = first_pg;
- while(count < tot_pages){
- if(direct_remap_page_range(start, current_pfn << PAGE_SHIFT,
- PAGE_SIZE, prot))
+ int pages, i;
+
+ while ( tot_pages != 0 )
+ {
+ dom0_op.cmd = DOM0_GETMEMLIST;
+ dom0_op.u.getmemlist.start_pfn = first_pg;
+ pages = 1023;
+ dom0_op.u.getmemlist.num_pfns = 1024;
+ if ( tot_pages < 1024 )
+ dom0_op.u.getmemlist.num_pfns = pages = tot_pages;
+ dom0_op.u.getmemlist.buffer = pfns;
+ (void)HYPERVISOR_dom0_op(&dom0_op);
+ first_pg = pfns[1023];
+
+ for ( i = 0; i < pages; i++ )
+ {
+ if(direct_remap_page_range(start, pfns[i] << PAGE_SHIFT,
+ PAGE_SIZE, prot))
goto out;
start += PAGE_SIZE;
- current_pfn = current_ft->next;
- current_ft = (frame_table_t *)(frame_table + current_pfn);
- count++;
+ tot_pages--;
+ }
}
out:
-
- return tot_pages - count;
+ free_page(pfns);
+ return tot_pages;
}
/* below functions replace standard sys_mmap and sys_munmap which are absolutely useless
}
/* add node on the list of directly mapped areas, make sure the
- * list remains sorted.
- */
+ * list remains sorted.
+ */
dmmap = (direct_mmap_node_t *)kmalloc(sizeof(direct_mmap_node_t), GFP_KERNEL);
dmmap->vm_start = addr;
dmmap->vm_end = addr + size;
#define DOM0_NEWDOMAIN 0
#define DOM0_KILLDOMAIN 1
-#define DOM0_MAPTASK 2
+#define DOM0_GETMEMLIST 2
#define MAP_DOM_MEM 3
#define DOM0_STARTDOM 4
#define MAX_CMD 4
unsigned int domain;
} dom0_killdomain_t;
-typedef struct dom0_map_ts
+typedef struct dom0_getmemlist_st
{
- unsigned int domain;
- unsigned long ts_phy_addr;
-} dom0_tsmap_t;
+ unsigned long start_pfn;
+ unsigned long num_pfns;
+ void *buffer;
+} dom0_getmemlist_t;
typedef struct dom_mem
{
{
dom0_newdomain_t newdomain;
dom0_killdomain_t killdomain;
- dom0_tsmap_t mapdomts;
+ dom0_getmemlist_t getmemlist;
dom_mem_t dommem;
dom_meminfo_t meminfo;
}
*/
-/* original version: xen-2.4.16/include/xeno/mm.h */
-typedef struct pfn_info {
- struct list_head list; /* ->mapping has some page lists. */
- unsigned long next; /* used for threading pages belonging */
- unsigned long prev; /* to same domain */
- unsigned long flags; /* atomic flags. */
- unsigned long tot_count; /* Total domain usage count. */
- unsigned long type_count; /* pagetable/dir, or domain-writeable refs. */
-} frame_table_t;
-
-extern frame_table_t * frame_table;
-
typedef struct proc_data {
unsigned int domain;
unsigned long map_size;